set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
p->state = STATE_IOREQ_READY;
- evtchn_send(iopacket_port(v->domain));
+ evtchn_send(iopacket_port(v));
vmx_wait_io();
if(dir==IOREQ_READ){ //read
*val=p->u.data;
set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
p->state = STATE_IOREQ_READY;
- evtchn_send(iopacket_port(v->domain));
+ evtchn_send(iopacket_port(v));
vmx_wait_io();
if(dir==IOREQ_READ){ //read
#include <xen/mm.h>
#include <public/arch-ia64.h>
#include <asm/hvm/vioapic.h>
+#include <public/event_channel.h>
/* Global flag to identify whether Intel vmx feature is on */
u32 vmx_enabled = 0;
{
vpd_t *vpd;
- /* Allocate resources for vcpu 0 */
- //memset(&v->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct));
-
vpd = alloc_vpd();
ASSERT(vpd);
void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c)
{
- shared_iopage_t *sp;
-
ASSERT(d != dom0); /* only for non-privileged vti domain */
d->arch.vmx_platform.shared_page_va =
__va(__gpa_to_mpa(d, IO_PAGE_START));
- sp = get_sp(d);
- //memset((char *)sp,0,PAGE_SIZE);
/* TEMP */
d->arch.vmx_platform.pib_base = 0xfee00000UL;
/* Only open one port for I/O and interrupt emulation */
memset(&d->shared_info->evtchn_mask[0], 0xff,
sizeof(d->shared_info->evtchn_mask));
- clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
/* Initialize the virtual interrupt lines */
vmx_virq_line_init(d);
hvm_vioapic_init(d);
}
+void vmx_do_launch(struct vcpu *v)
+{
+ if (evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0) {
+ printk("VMX domain bind port %d to vcpu %d failed!\n",
+ iopacket_port(v), v->vcpu_id);
+ domain_crash_synchronous();
+ }
+
+ clear_bit(iopacket_port(v),
+ &v->domain->shared_info->evtchn_mask[0]);
+ vmx_load_all_rr(v);
+}
{
struct vcpu *v = current;
struct domain *d = v->domain;
- int port = iopacket_port(d);
+ int port = iopacket_port(v);
do {
if (!test_bit(port,
struct domain *d = v->domain;
extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
unsigned long *pend_irr);
- int port = iopacket_port(d);
+ int port = iopacket_port(v);
/* I/O emulation is atomic, so it's impossible to see execution flow
* out of vmx_wait_io, when guest is still waiting for response.
context_saved(prev);
if (VMX_DOMAIN(current)) {
- vmx_load_all_rr(current);
+ vmx_do_launch(current);
} else {
load_region_regs(current);
vcpu_load_kernel_regs(current);
return &((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
}
-static inline int iopacket_port(struct domain *d)
+static inline int iopacket_port(struct vcpu *v)
{
- return ((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->sp_global.eport;
+ return get_vio(v->domain, v->vcpu_id)->vp_eport;
}
static inline shared_iopage_t *get_sp(struct domain *d)